import

import torch 
from fastai.vision.all import *
import cv2

data

path=untar_data(URLs.PETS)/'images'
path
Path('/home/khy/.fastai/data/oxford-iiit-pet/images')
files=get_image_files(path)
def label_func(f):
    if f[0].isupper():
        return 'cat' 
    else: 
        return 'dog' 
dls=ImageDataLoaders.from_name_func(path,files,label_func,item_tfms=Resize(512)) 
Could not do one pass in your dataloader, there is something wrong in it

learn

lrnr=cnn_learner(dls,resnet34,metrics=error_rate)
lrnr.fine_tune(1)
epoch train_loss valid_loss error_rate time
0 0.178037 0.019520 0.009472 00:34
epoch train_loss valid_loss error_rate time
0 0.039352 0.005454 0.000677 00:41

sample

1st CNN and CAM

get_image_files(path)[0]
Path('/home/khy/.fastai/data/oxford-iiit-pet/images/boxer_128.jpg')
img = PILImage.create(get_image_files(path)[0])
img
x, = first(dls.test_dl([img]))  #이미지 텐서화
x.shape
torch.Size([1, 3, 512, 512])
net1=lrnr.model[0]
net2=lrnr.model[1] 
net2 = torch.nn.Sequential(
    torch.nn.AdaptiveAvgPool2d(output_size=1), 
    torch.nn.Flatten(),
    torch.nn.Linear(512,out_features=2,bias=False))
net=torch.nn.Sequential(net1,net2)
lrnr2=Learner(dls,net,metrics=accuracy) 
lrnr2.fine_tune(15) 
epoch train_loss valid_loss accuracy time
0 0.259194 5.446237 0.415426 00:41
epoch train_loss valid_loss accuracy time
0 0.115573 0.098918 0.966847 00:41
1 0.084512 0.192782 0.924899 00:41
2 0.108377 0.263869 0.929635 00:41
3 0.108298 0.147747 0.946549 00:41
4 0.092053 0.091769 0.966170 00:41
5 0.071956 0.128430 0.957375 00:41
6 0.085712 0.074232 0.974966 00:41
7 0.055636 0.113164 0.956698 00:41
8 0.049553 0.095807 0.968200 00:41
9 0.032400 0.098665 0.966170 00:41
10 0.021480 0.058845 0.978349 00:41
11 0.014291 0.047291 0.983085 00:41
12 0.009199 0.043928 0.982409 00:41
13 0.008911 0.048288 0.982409 00:41
14 0.006678 0.049579 0.982409 00:41
dls.vocab
['cat', 'dog']
  • (고양이,강아지)라고 생각한 확률
a=net(x).tolist()[0][0]
b=net(x).tolist()[0][1]
np.exp(a)/(np.exp(a)+np.exp(b)), np.exp(b)/(np.exp(a)+np.exp(b))
(1.752036905842922e-05, 0.9999824796309416)
  • CAM
camimg = torch.einsum('ij,jkl -> ikl', net2[2].weight, net1(x).squeeze())
fig, (ax1,ax2) = plt.subplots(1,2) 
# 
dls.train.decode((x,))[0].squeeze().show(ax=ax1)
ax1.imshow(camimg[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax2)
ax2.imshow(camimg[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(8)            
fig.set_figheight(8)
fig.tight_layout()

2nd CNN and CAM

  • MODE 1 만들기

    • 가중치 재설정
test=camimg[1]-torch.min(camimg[1])
A1=torch.exp(-0.015*test)  
A2=1-A1
fig, (ax1,ax2) = plt.subplots(1,2) 
# 
dls.train.decode((x,))[0].squeeze().show(ax=ax1)
ax1.imshow(A1.data.to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax2)
ax2.imshow(A2.data.to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(8)            
fig.set_figheight(8)
fig.tight_layout()
A1.data.to('cpu').shape
torch.Size([16, 16])
X1=np.array(A1.to("cpu").detach(),dtype=np.float32)
Y1=torch.Tensor(cv2.resize(X1,(512,512),interpolation=cv2.INTER_LINEAR))
x1=x.squeeze().to('cpu')*Y1   #MODE1을 x1으로 저장
fig, (ax1,ax2) = plt.subplots(1,2) 
#
x1.squeeze().show(ax=ax1)  #MODE1
dls.train.decode((x,))[0].squeeze().show(ax=ax2)
fig.set_figwidth(8)            
fig.set_figheight(8)
fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
x1=x1.reshape(1,3,512,512)
net1.to('cpu')
net2.to('cpu')
Sequential(
  (0): AdaptiveAvgPool2d(output_size=1)
  (1): Flatten(start_dim=1, end_dim=-1)
  (2): Linear(in_features=512, out_features=2, bias=False)
)
camimg1 = torch.einsum('ij,jkl -> ikl', net2[2].weight, net1(x1).squeeze())
camimg1.shape
torch.Size([2, 16, 16])
  • CAM

    • mode1에 CAM 결과 올리기
fig, (ax1,ax2) = plt.subplots(1,2) 
# 
x1.squeeze().show(ax=ax1)
ax1.imshow(camimg1[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
x1.squeeze().show(ax=ax2)
ax2.imshow(camimg1[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(8)            
fig.set_figheight(8)
fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
- 첫번째 CAM결과와 비교
fig, (ax1,ax2) = plt.subplots(1,2) 
# 
dls.train.decode((x,))[0].squeeze().show(ax=ax1)
ax1.imshow(camimg[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax2)
ax2.imshow(camimg1[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(8)            
fig.set_figheight(8)
fig.tight_layout()
a1=net(x1).tolist()[0][0]
b1=net(x1).tolist()[0][1]
np.exp(a1)/(np.exp(a1)+np.exp(b1)), np.exp(b1)/(np.exp(a1)+np.exp(b1))
(0.004487248125871372, 0.9955127518741286)

3rd CNN

  • MODE 2 만들기
test1=camimg1[1]-torch.min(camimg1[1])
A3=torch.exp(-0.03*test1)  
A4=1-A3
fig, (ax1,ax2) = plt.subplots(1,2) 
# 
dls.train.decode((x,))[0].squeeze().show(ax=ax1)
ax1.imshow(A3.data.to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax2)
ax2.imshow(A4.data.to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(8)            
fig.set_figheight(8)
fig.tight_layout()
A3.data.to('cpu').shape
torch.Size([16, 16])
X2=np.array(A3.to("cpu").detach(),dtype=np.float32)
Y2=torch.Tensor(cv2.resize(X2,(512,512),interpolation=cv2.INTER_LINEAR))
x2=x1.squeeze().to('cpu')*Y2   #MODE2을 x2로 저장
fig, (ax1,ax2,ax3) = plt.subplots(1,3) 
#
x2.show(ax=ax1)  #MODE2
x1.squeeze().show(ax=ax2)  #MODE1
dls.train.decode((x,))[0].squeeze().show(ax=ax3)
fig.set_figwidth(12)            
fig.set_figheight(12)
fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
x2=x2.reshape(1,3,512,512)
net1.to('cpu')
net2.to('cpu')
Sequential(
  (0): AdaptiveAvgPool2d(output_size=1)
  (1): Flatten(start_dim=1, end_dim=-1)
  (2): Linear(in_features=512, out_features=2, bias=False)
)
camimg2 = torch.einsum('ij,jkl -> ikl', net2[2].weight, net1(x2).squeeze())
  • CAM

    • mode2에 CAM 결과 올리기
fig, (ax1, ax2) = plt.subplots(1,2) 
x2.squeeze().show(ax=ax1)
ax1.imshow(camimg2[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
x2.squeeze().show(ax=ax2)
ax2.imshow(camimg2[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(8)            
fig.set_figheight(8)
fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
- 첫번째, 두번째 CAM결과와 비교
fig, (ax1,ax2,ax3) = plt.subplots(1,3) 
# 
dls.train.decode((x,))[0].squeeze().show(ax=ax1)
ax1.imshow(camimg[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax2)
ax2.imshow(camimg1[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax3)
ax3.imshow(camimg2[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(16)            
fig.set_figheight(16)
fig.tight_layout()
a2=net(x2).tolist()[0][0]
b2=net(x2).tolist()[0][1]
np.exp(a2)/(np.exp(a2)+np.exp(b2)), np.exp(b2)/(np.exp(a2)+np.exp(b2))
(0.026369678868924343, 0.9736303211310757)

4th CNN

  • MODE 3 만들기
test2=camimg2[1]-torch.min(camimg2[1])
A5=torch.exp(-0.06*test2)  
A6=1-A5
fig, (ax1,ax2) = plt.subplots(1,2) 
# 
dls.train.decode((x,))[0].squeeze().show(ax=ax1)
ax1.imshow(A5.data.to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax2)
ax2.imshow(A6.data.to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(8)            
fig.set_figheight(8)
fig.tight_layout()
A5.data.to('cpu').shape
torch.Size([16, 16])
X3=np.array(A5.to("cpu").detach(),dtype=np.float32)
Y3=torch.Tensor(cv2.resize(X3,(512,512),interpolation=cv2.INTER_LINEAR))
x3=x2.squeeze().to('cpu')*Y3   #MODE3을 x3로 저장
fig, (ax1,ax2,ax3,ax4) = plt.subplots(1,4) 
#
x3.show(ax=ax1)  #MODE3
x2.squeeze().show(ax=ax2)  #MODE2
x1.squeeze().show(ax=ax3)  #MODE1
dls.train.decode((x,))[0].squeeze().show(ax=ax4)
fig.set_figwidth(16)            
fig.set_figheight(16)
fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
x3=x3.reshape(1,3,512,512)
net1.to('cpu')
net2.to('cpu')
Sequential(
  (0): AdaptiveAvgPool2d(output_size=1)
  (1): Flatten(start_dim=1, end_dim=-1)
  (2): Linear(in_features=512, out_features=2, bias=False)
)
camimg3 = torch.einsum('ij,jkl -> ikl', net2[2].weight, net1(x3).squeeze())
  • CAM

    • mode3에 CAM 결과 올리기
fig, (ax1, ax2) = plt.subplots(1,2) 
x3.squeeze().show(ax=ax1)
ax1.imshow(camimg3[0].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
x3.squeeze().show(ax=ax2)
ax2.imshow(camimg3[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(8)            
fig.set_figheight(8)
fig.tight_layout()
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
Clipping input data to the valid range for imshow with RGB data ([0..1] for floats or [0..255] for integers).
- 첫번째, 두번째, 세번째 CAM결과와 비교
fig, (ax1,ax2,ax3,ax4) = plt.subplots(1,4) 
# 
dls.train.decode((x,))[0].squeeze().show(ax=ax1)
ax1.imshow(camimg[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax2)
ax2.imshow(camimg1[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax3)
ax3.imshow(camimg2[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
#
dls.train.decode((x,))[0].squeeze().show(ax=ax4)
ax4.imshow(camimg3[1].to("cpu").detach(),alpha=0.5,extent=(0,511,511,0),interpolation='bilinear',cmap='magma')
fig.set_figwidth(16)            
fig.set_figheight(16)
fig.tight_layout()
a3=net(x3).tolist()[0][0]
b3=net(x3).tolist()[0][1]
np.exp(a3)/(np.exp(a3)+np.exp(b3)), np.exp(b3)/(np.exp(a3)+np.exp(b3))
(0.052085219034464114, 0.9479147809655359)

  • CNN, CAM 횟수가 증가함에 따라 exp(-k*test)의 k를 2배씩 해주었음.$\to$ 2배로 증가시키니 k값을 유지했을 때보다 강아지라고 인식한 곳이 잘 이동함.
  • lrnr2 epoch수를 늘려 과적합시킬수록 강아지라고 인식한 곳의 이동이 확연히 드러남.